1 //-------------------------------------------------------------------------------------------------
3 // Copyright (c) Microsoft Corporation. All rights reserved.
5 // Implements the page manager for our NorlsAllocator instances
7 //-------------------------------------------------------------------------------------------------
11 /* Page allocation heap. Allocates and frees pages
12 * or groups of pages. Allocation must be a multiple
13 * of the system page size. Memory is not zeroed.
16 #define SPREAD_ALLOCATIONS // allocate after the previous allocation instead of looking from the
17 // beginning of each arena
19 #define PAGES_PER_ARENA 128 // 4k system page size => 128*4k = 512KB per arena
20 #define BIGALLOC_SIZE (128 * 1024) // more than this alloc (128KB) is not done from an arena.
22 #define DWORD_BIT_SHIFT 5 // log2 of bits in a DWORD.
23 #define BITS_DWORD (1 << DWORD_BIT_SHIFT)
24 #define DWORD_BIT_MASK (BITS_DWORD - 1)
26 extern size_t GetSystemPageSize();
38 // Store information about a memory arena we allocate pages from.
43 PageArena
* nextArena
; // the arena.
44 void* pages
; // the pages in the arena.
45 size_t size
; // size of the arena.
46 PageArenaType type
; // large allocs and single page allocs have special cased codepaths
47 DWORD used
[PAGES_PER_ARENA
/ BITS_DWORD
]; // bit map of in-use pages in this arena.
48 DWORD committed
[PAGES_PER_ARENA
/ BITS_DWORD
]; // bit map of committed pages in this arena.
51 bool OwnsPage(const void* p
) const
53 return (p
>= pages
&& p
< (BYTE
*)pages
+ size
);
56 bool IsPageCommitted (unsigned iPage
) const
58 return TestPage(committed
, iPage
);
60 bool IsPageUsed (unsigned iPage
) const
62 return TestPage(used
, iPage
);
65 void MarkPageUsed (unsigned iPage
)
70 void MarkPageCommitted (unsigned iPage
)
72 SetPage(committed
, iPage
);
75 void ClearPageUsed (unsigned iPage
)
77 ClearPage(used
, iPage
);
80 void ClearPageCommitted (unsigned iPage
)
82 ClearPage(committed
, iPage
);
85 bool HasUsedPages() const;
87 size_t GetAddressSpaceSize() const
92 void FreeAddressSpace();
93 void* AllocPages(unsigned int cPages
, PageHeap
& parent
);
94 void* AllocPagesHelper(int iPage
, unsigned int cPages
, PageHeap
& parent
);
98 #ifdef SPREAD_ALLOCATIONS
99 int m_iStartNextAlloc
; // used in PageArena::AllocPages
102 int LookForPages(unsigned int cPages
, int indexPageBegin
, int indexLastValidPage
);
104 void SetPage(_Out_cap_(PAGES_PER_ARENA
>> DWORD_BIT_SHIFT
) DWORD bitvector
[], unsigned index
)
106 VSASSERT(index
< PAGES_PER_ARENA
, "Invalid");
107 bitvector
[index
>> DWORD_BIT_SHIFT
] |= (1 << (index
& DWORD_BIT_MASK
));
110 bool TestPage(DWORD
const bitvector
[], unsigned index
) const
112 VSASSERT(index
< PAGES_PER_ARENA
, "Invalid");
113 return bitvector
[index
>> DWORD_BIT_SHIFT
] & (1 << (index
& DWORD_BIT_MASK
));
116 void ClearPage(_Out_cap_(PAGES_PER_ARENA
>> DWORD_BIT_SHIFT
) DWORD bitvector
[], unsigned index
)
118 VSASSERT(index
< PAGES_PER_ARENA
, "Invalid");
119 bitvector
[index
>> DWORD_BIT_SHIFT
] &= ~(1 << (index
& DWORD_BIT_MASK
));
123 struct SinglePageArena
: public PageArena
125 int freePageStack
[PAGES_PER_ARENA
]; // stack of free pages available in singlePageAlloc case
126 int topOfFreePageStack
; // -1 when there are no free pages, will initially be PAGES_PER_ARENA -1
130 int NumberOfFreePagesAvailable()
132 return topOfFreePageStack
+ 1;
139 static void StaticInit();
141 void* AllocPages( _In_
size_t sz
);
142 void FreePages(ProtectedEntityFlagsEnum entity
, _Post_invalid_
void* p
, size_t sz
);
143 void FreeAllPages(bool checkLeaks
= true);
145 // When previously committed pages are freed, they are merely marked
146 // as unused, not decommitted. Call this to decommit any unused pages.
147 bool DecommitUnusedPages();
148 // After all unsused pages in an arena are decommitted (by calling DecommitUnusedPages),
149 // the arena may be left with no used pages. In that case it can be freed,
150 // returning address space to the process. Call this to release any unsued arenas.
151 void FreeUnusedArenas();
152 // [....] this call first decommits unused pages and then frees unused arenas.
153 void ShrinkUnusedResources();
155 static size_t pageSize
; // The system page size.
157 unsigned GetCurrentUseSize() const
159 return (unsigned)(m_pageCurUse
* pageSize
);
161 unsigned GetMaxUseSize() const
163 return (unsigned)(m_pageMaxUse
* pageSize
);
165 unsigned GetCurrentReserveSize() const
167 return (unsigned)(m_pageCurReserve
* pageSize
);
169 unsigned GetMaxReserveSize() const
171 return (unsigned)(m_pageMaxReserve
* pageSize
);
174 PageArena
* FindArena(const void * p
);
177 CTinyLock lock
; // This is the lock mechanism for thread safety.
179 PageArena
* CreateArena(PageArenaType type
, size_t sz
);
181 template <typename T
>
182 void RemoveArena(const T
* goingAway
, T
*& containingArenaList
, T
*& containingArenaListLast
);
183 bool DecommitUnusedPagesFromArenaList(PageArena
* list
);
185 void FreePagesHelper(ProtectedEntityFlagsEnum entity
, PageArena
* arena
, _Post_invalid_
void* p
, size_t sz
);
187 // special case allocation/free behavior for large allocations
188 void * LargeAlloc(size_t sz
);
189 void LargeFree(void * p
, size_t sz
);
191 // special case allocation/free behavior for single page allocations
192 void* SinglePageAlloc();
193 void SinglePageFree(ProtectedEntityFlagsEnum entity
, _Post_invalid_
void* p
);
195 SinglePageArena
* singlePageArenaList
; // List of memory arenas exclusively for single page allocs
196 SinglePageArena
* singlePageArenaLast
; // Last memory arena in list.
198 // used to efficiently find the arena a freed memory address belonged to
199 std::map
<void *, SinglePageArena
*> addressToSinglePageArenaMap
;
200 // used to efficiently find an arena to make a new allocation from
201 std::queue
<SinglePageArena
*> singlePageArenasWithFreePages
;
203 PageArena
* arenaList
; // List of memory arenas.
204 PageArena
* arenaLast
; // Last memory arena in list.
206 ProtectedEntityFlagsEnum whatIsProtected
;
208 static int pageShift
; // log2 of the page size
209 static bool reliableCommit
; // Commit of memory protects it correctly even if already committed
211 size_t m_pageCurUse
, m_pageMaxUse
;
212 size_t m_pageCurReserve
, m_pageMaxReserve
;